[IA64] domheap: Allocate privregs from domain heap for VTi domain
authorAlex Williamson <alex.williamson@hp.com>
Thu, 17 Jan 2008 19:05:43 +0000 (12:05 -0700)
committerAlex Williamson <alex.williamson@hp.com>
Thu, 17 Jan 2008 19:05:43 +0000 (12:05 -0700)
- Pin privregs down with both dtr/itr so that privregs can be allocated
  from the domain heap
- Introduce vmx_vpd_pin()/vmx_vpd_unpin().
  The vpd area is pinned down when current.  But two functions,
  update_vhpi() and alloc_vpd() are exceptions.
  We have to pin down the area before PAL call.
- Minor twist context switch not to use unpinned vpd area.
  vmx_load_state() needs the vpd area pinned down.
  Call it after vmx_load_all_rr()
- Fix vmx_load_all_rr()
  vmx_switch_rr7() sets psr.ic = 0 so that clearing psr.ic before calling
  vmx_switch_rr7() doesn't make sense.
- Improve vmx_switch_rr7()
  It sets psr.ic = 0 after switching to physical mode. But it can be
  done at the switching time.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/vmx/vlsapic.c
xen/arch/ia64/vmx/vmx_entry.S
xen/arch/ia64/vmx/vmx_init.c
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/vmx/vmx_vcpu.c
xen/arch/ia64/xen/domain.c
xen/include/asm-ia64/vmx_vcpu.h
xen/include/asm-ia64/vmx_vpd.h
xen/include/asm-ia64/xenkregs.h

index 2cefd5aacf280a190db5dd8565605b298715daf3..a2728bc5b7bd21ca5a8b7da2f0c2b43cfdd41b11 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/gcc_intrin.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/vmx.h>
+#include <asm/vmx_vpd.h>
 #include <asm/hw_irq.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
@@ -91,9 +92,12 @@ static void update_vhpi(VCPU *vcpu, int vec)
 
     VCPU(vcpu,vhpi) = vhpi;
     // TODO: Add support for XENO
-    if (VCPU(vcpu,vac).a_int)
+    if (VCPU(vcpu,vac).a_int) {
+        vmx_vpd_pin(vcpu);
         ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, 
                       (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0);
+        vmx_vpd_unpin(vcpu);
+    }
 }
 
 
index f624554e170e6798cb7dd9d30555045a862cb07f..0eed7746450890ebd0e9496a3b4103d804b06bdb 100644 (file)
@@ -623,14 +623,14 @@ END(ia64_leave_hypercall)
 #define PSR_BITS_TO_CLEAR                                           \
        (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |     \
         IA64_PSR_RT | IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI |    \
-        IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH)
+        IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)
 #define PSR_BITS_TO_SET    IA64_PSR_BN
 
-//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * pal_vaddr );
+//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * pal_vaddr, void * shared_arch_info );
 GLOBAL_ENTRY(vmx_switch_rr7)
        // not sure this unwind statement is correct...
        .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
-       alloc loc1 = ar.pfs, 3, 7, 0, 0
+       alloc loc1 = ar.pfs, 4, 8, 0, 0
 1:{
        mov r28  = in0                  // copy procedure index
        mov r8   = ip                   // save ip to compute branch
@@ -643,7 +643,12 @@ GLOBAL_ENTRY(vmx_switch_rr7)
        tpa r3 = r8                     // get physical address of ip
        dep loc5 = 0,in1,60,4           // get physical address of guest_vhpt
        dep loc6 = 0,in2,60,4           // get physical address of pal code
+       dep loc7 = 0,in3,60,4           // get physical address of privregs
        ;;
+       dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
+                                        // mask granule shift
+       dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
+                                        // mask granule shift
        mov loc4 = psr                  // save psr
        ;;
        mov loc3 = ar.rsc               // save RSE configuration
@@ -661,11 +666,9 @@ GLOBAL_ENTRY(vmx_switch_rr7)
        dep r16=-1,r0,61,3
        ;;
        mov rr[r16]=in0
-       srlz.d
-       ;;
-       rsm 0x6000
        ;;
        srlz.d
+       ;;
 
        // re-pin mappings for kernel text and data
        mov r18=KERNEL_TR_PAGE_SHIFT<<2
@@ -679,6 +682,7 @@ GLOBAL_ENTRY(vmx_switch_rr7)
        mov r16=IA64_TR_KERNEL
        movl r25 = PAGE_KERNEL
        // r2=KERNEL_TR_PAGE_SHIFT truncated physicall address of ip
+       //   = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)
        dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
        ;;
        or r24=r2,r25
@@ -737,7 +741,9 @@ GLOBAL_ENTRY(vmx_switch_rr7)
        // re-pin mappings for guest_vhpt
        // unless overlaps with IA64_TR_XEN_HEAP_REGS or IA64_TR_CURRENT_STACK
        dep r18=0,loc5,0,KERNEL_TR_PAGE_SHIFT
+       // r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)
        dep r21=0,r21,0,IA64_GRANULE_SHIFT 
+       // r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
        dep r17=0,loc5,0,IA64_GRANULE_SHIFT 
        ;;
        cmp.eq p6,p0=r18,r2             // check overlap with xen heap
@@ -771,6 +777,43 @@ GLOBAL_ENTRY(vmx_switch_rr7)
        itr.i itr[r24]=loc6             // wire in new mapping...
        ;;
 
+       // r16, r19, r20 are used by
+       //  ia64_switch_mode_phys()/ia64_switch_mode_virt()
+       // re-pin mappings for privregs
+       // r2   = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)
+       // r21  = (current physical addr) & (IA64_GRANULE_SIZE - 1)
+       // r17  = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
+
+       // r24  = (privregs physical addr) & (KERNEL_TR_PAGE_SIZE - 1)
+       // loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | PAGE_KERNEL
+       // loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
+       dep r24 = 0,loc7,0,KERNEL_TR_PAGE_SHIFT
+       ;;
+       cmp.ne p6,p0=r24,r2             // check overlap with xen heap
+       ;; 
+(p6)   cmp.ne.unc p7,p0=r21,loc7       // check overlap with current stack
+       ;;
+(p7)   cmp.ne.unc p8,p0=r17,loc7       // check overlap with guest_vhpt
+       ;;
+       // loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL
+       or loc7 = r25,loc7          // construct PA | page properties
+       ;;
+       cmp.ne p9,p0=loc6,loc7
+       mov r22=IA64_TR_VPD
+       mov r24=IA64_TR_MAPPED_REGS
+       mov r23=IA64_GRANULE_SHIFT<<2
+       ;;
+(p9)   ptr.i   in3,r23 
+(p8)   ptr.d   in3,r23
+       mov cr.itir=r23
+       mov cr.ifa=in3
+       ;;
+(p9)   itr.i itr[r22]=loc7         // wire in new mapping...
+       ;;
+(p8)   itr.d dtr[r24]=loc7         // wire in new mapping...
+       ;;
+.privregs_overlaps:
+
        // done, switch back to virtual and return
        mov r16=loc4                    // r16= original psr
        br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
index 38d293f711b61b6595d12216b134f4c18812bb4f..2c3a9c0a222f57833be51debebadb7c16ac75506 100644 (file)
@@ -51,6 +51,7 @@
 #include <asm/viosapic.h>
 #include <xen/event.h>
 #include <asm/vlsapic.h>
+#include <asm/vhpt.h>
 #include "entry.h"
 
 /* Global flag to identify whether Intel vmx feature is on */
@@ -150,20 +151,21 @@ typedef union {
        };
 } cpuid3_t;
 
-/* Allocate vpd from xenheap */
+/* Allocate vpd from domheap */
 static vpd_t *alloc_vpd(void)
 {
        int i;
        cpuid3_t cpuid3;
+       struct page_info *page;
        vpd_t *vpd;
        mapped_regs_t *mregs;
 
-       vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
-       if (!vpd) {
+       page = alloc_domheap_pages(NULL, get_order(VPD_SIZE), 0);
+       if (page == NULL) {
                printk("VPD allocation failed.\n");
                return NULL;
        }
-       vpd = (vpd_t *)virt_to_xenva(vpd);
+       vpd = page_to_virt(page);
 
        printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n",
               vpd, sizeof(vpd_t));
@@ -191,12 +193,79 @@ static vpd_t *alloc_vpd(void)
        return vpd;
 }
 
-/* Free vpd to xenheap */
+/* Free vpd to domheap */
 static void
 free_vpd(struct vcpu *v)
 {
        if ( v->arch.privregs )
-               free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE));
+               free_domheap_pages(virt_to_page(v->arch.privregs),
+                                  get_order(VPD_SIZE));
+}
+
+// This is used for PAL_VP_CREATE and PAL_VPS_SET_PENDING_INTERRUPT
+// so that we don't have to pin the vpd down with itr[].
+void
+__vmx_vpd_pin(struct vcpu* v)
+{
+       unsigned long privregs = (unsigned long)v->arch.privregs;
+       u64 psr;
+       
+       // check overlapping with xenheap
+       if ((privregs &
+            ~(KERNEL_TR_PAGE_SIZE - 1)) ==
+           ((unsigned long)__va(ia64_tpa(current_text_addr())) &
+            ~(KERNEL_TR_PAGE_SIZE - 1)))
+               return;
+               
+       privregs &= ~(IA64_GRANULE_SIZE - 1);
+
+       // check overlapping with current stack
+       if (privregs ==
+           ((unsigned long)current & ~(IA64_GRANULE_SIZE - 1)))
+               return;
+
+       if (!VMX_DOMAIN(current)) {
+               // check overlapping with vhpt
+               if (privregs ==
+                   (vcpu_vhpt_maddr(current) & ~(IA64_GRANULE_SHIFT - 1)))
+                       return;
+       } else {
+               // check overlapping with vhpt
+               if (privregs ==
+                   ((unsigned long)current->arch.vhpt.hash &
+                    ~(IA64_GRANULE_SHIFT - 1)))
+                       return;
+
+               // check overlapping with privregs
+               if (privregs ==
+                   ((unsigned long)current->arch.privregs &
+                    ~(IA64_GRANULE_SHIFT - 1)))
+                       return;
+       }
+
+       psr = ia64_clear_ic();
+       ia64_ptr(0x2 /*D*/, privregs, IA64_GRANULE_SIZE);
+       ia64_srlz_d();
+       ia64_itr(0x2 /*D*/, IA64_TR_MAPPED_REGS, privregs,
+                pte_val(pfn_pte(__pa(privregs) >> PAGE_SHIFT, PAGE_KERNEL)),
+                IA64_GRANULE_SHIFT);
+       ia64_set_psr(psr);
+       ia64_srlz_d();
+}
+
+void
+__vmx_vpd_unpin(struct vcpu* v)
+{
+       if (!VMX_DOMAIN(current)) {
+               int rc;
+               rc = !set_one_rr(VRN7 << VRN_SHIFT, VCPU(current, rrs[VRN7]));
+               BUG_ON(rc);
+       } else {
+               IA64FAULT fault;
+               fault = vmx_vcpu_set_rr(current, VRN7 << VRN_SHIFT,
+                                       VMX(current, vrr[VRN7]));
+               BUG_ON(fault != IA64_NO_FAULT);
+       }
 }
 
 /*
@@ -212,7 +281,11 @@ vmx_create_vp(struct vcpu *v)
        /* ia64_ivt is function pointer, so need this tranlation */
        ivt_base = (u64) &vmx_ia64_ivt;
        printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base);
+
+       vmx_vpd_pin(v);
        ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
+       vmx_vpd_unpin(v);
+       
        if (ret != PAL_STATUS_SUCCESS){
                panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
        }
@@ -224,6 +297,7 @@ vmx_save_state(struct vcpu *v)
 {
        u64 status;
 
+       BUG_ON(v != current);
        /* FIXME: about setting of pal_proc_vector... time consuming */
        status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS){
@@ -250,6 +324,7 @@ vmx_load_state(struct vcpu *v)
 {
        u64 status;
 
+       BUG_ON(v != current);
        status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS){
                panic_domain(vcpu_regs(v),"Restore vp status failed\n");
@@ -518,6 +593,7 @@ void vmx_do_resume(struct vcpu *v)
        ioreq_t *p;
 
        vmx_load_all_rr(v);
+       vmx_load_state(v);
        migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
 
        /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
index 15693298e8cef69ab40723a92a603707df460695..410454b578e1bb90395f91e5cef13ac35c31e6e6 100644 (file)
@@ -138,7 +138,6 @@ extern void * pal_vaddr;
 void
 vmx_load_all_rr(VCPU *vcpu)
 {
-       unsigned long psr;
        unsigned long rr0, rr4;
 
        switch (vcpu->arch.arch_vmx.mmu_mode) {
@@ -158,8 +157,6 @@ vmx_load_all_rr(VCPU *vcpu)
                panic_domain(NULL, "bad mmu mode value");
        }
 
-       psr = ia64_clear_ic();
-
        ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
        ia64_dv_serialize_data();
        ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
@@ -175,13 +172,12 @@ vmx_load_all_rr(VCPU *vcpu)
        ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
        ia64_dv_serialize_data();
        vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
-                      (void *)vcpu->arch.vhpt.hash, pal_vaddr);
+                      (void *)vcpu->arch.vhpt.hash,
+                      pal_vaddr, vcpu->arch.privregs);
        ia64_set_pta(VMX(vcpu, mpta));
        vmx_ia64_set_dcr(vcpu);
 
        ia64_srlz_d();
-       ia64_set_psr(psr);
-       ia64_srlz_i();
 }
 
 void
index fbb97bfe9cdccde4f668c5f71bfe2c0e883948f4..6e1ca276579cac6d2e33d7f19cbb8efe7ba70e91 100644 (file)
@@ -181,8 +181,8 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
     switch((u64)(reg>>VRN_SHIFT)) {
     case VRN7:
         if (likely(vcpu == current))
-            vmx_switch_rr7(vrrtomrr(vcpu,val),
-                           (void *)vcpu->arch.vhpt.hash, pal_vaddr );
+            vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
+                           pal_vaddr, vcpu->arch.privregs);
        break;
     case VRN4:
         rrval = vrrtomrr(vcpu,val);
index ac2ab12700420afb45f6648d6e52000a7c86cd6d..c9cc316fd283c620bebe6f44bd9b66df1912a2f1 100644 (file)
@@ -241,8 +241,6 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
             ia64_setreg(_IA64_REG_CR_DCR, dcr);
         }
     }
-    if (VMX_DOMAIN(next))
-        vmx_load_state(next);
 
     ia64_disable_vhpt_walker();
     lazy_fp_switch(prev, current);
@@ -261,6 +259,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
     if (VMX_DOMAIN(current)) {
         vmx_load_all_rr(current);
+        vmx_load_state(current);
         migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
                       current->processor);
     } else {
index 0106a64c483b137933ee5f25a9d428e982a70f1d..f19ebca015017840c05b553f39e223266660dffb 100644 (file)
@@ -114,7 +114,7 @@ extern void memwrite_v(VCPU * vcpu, thash_data_t * vtlb, u64 * src, u64 * dest,
                        size_t s);
 extern void memwrite_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
 extern void vcpu_load_kernel_regs(VCPU * vcpu);
-extern void vmx_switch_rr7(unsigned long, void *, void *);
+extern void vmx_switch_rr7(unsigned long, void *, void *, void *);
 
 extern void dtlb_fault(VCPU * vcpu, u64 vadr);
 extern void nested_dtlb(VCPU * vcpu);
index d69bd96e8990bda8f10c4aa9fe2904c0315caeaf..23c9f049a0dc6527c7ef770f82f0686ec9a5d763 100644 (file)
@@ -80,6 +80,24 @@ struct arch_vmx_struct {
 
 #define ARCH_VMX_DOMAIN         0       /* Need it to indicate VTi domain */
 
+/* pin/unpin vpd area for PAL call with DTR[] */
+void __vmx_vpd_pin(struct vcpu* v);
+void __vmx_vpd_unpin(struct vcpu* v); 
+
+static inline void vmx_vpd_pin(struct vcpu* v)
+{
+    if (likely(v == current))
+        return;
+    __vmx_vpd_pin(v);
+}
+
+static inline void vmx_vpd_unpin(struct vcpu* v)
+{
+    if (likely(v == current))
+        return;
+    __vmx_vpd_unpin(v);
+}
+
 #endif //__ASSEMBLY__
 
 // VPD field offset
index 432387ac9868e2b571f7868a8a16fc430b1253ad..6082c02a3f22d3cc56661c76ff327792595ea504 100644 (file)
@@ -9,6 +9,8 @@
 #define IA64_TR_MAPPED_REGS    5       /* dtr5: vcpu mapped regs */
 #define        IA64_TR_VHPT            6       /* dtr6: vhpt */
 
+#define IA64_TR_VPD            2       /* itr2: vpd */
+
 #define IA64_DTR_GUEST_KERNEL   7
 #define IA64_ITR_GUEST_KERNEL   2
 /* Processor status register bits: */